import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrixdatos_iris = pd.read_csv("../datos/iris.csv", sep = ";", decimal = ".", header = 0)
datos_iris## s.largo s.ancho p.largo p.ancho tipo
## 0 5.1 3.5 1.4 0.2 setosa
## 1 4.9 3.0 1.4 0.2 setosa
## 2 4.7 3.2 1.3 0.2 setosa
## 3 4.6 3.1 1.5 0.2 setosa
## 4 5.0 3.6 1.4 0.2 setosa
## .. ... ... ... ... ...
## 145 6.7 3.0 5.2 2.3 virginica
## 146 6.3 2.5 5.0 1.9 virginica
## 147 6.5 3.0 5.2 2.0 virginica
## 148 6.2 3.4 5.4 2.3 virginica
## 149 5.9 3.0 5.1 1.8 virginica
##
## [150 rows x 5 columns]
from sklearn.preprocessing import LabelEncoder
#Dividimos los datos en variables predictoras y variable a predecir
x = np.array(datos_iris.iloc[:,0:4])
y = datos_iris["tipo"].ravel()
#Recodificamos la variable a predecir a enteros de 0 a num_clases-1
encoder = LabelEncoder()
y = encoder.fit_transform(y)
print(y)## [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
## 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
## 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2
## 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
## 2 2]
encoder.classes_## array(['setosa', 'versicolor', 'virginica'], dtype=object)
#Realizamos Training-Testing
x_train, x_test, y_train, y_test = train_test_split(x, y, train_size = 0.75, random_state = 0)from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
# Instanciamos nuestra base. Se puede ver como una caja vacÃa donde vamos añadiendo elementos.
modelo = Sequential()
#Capa de entrada y primer capa oculta
#input_shape = (4,) debido a que son 4 variables predictoras
modelo.add(Dense(units = 40,activation='relu', input_shape=(4,)))
# Segunda Capa oculta
modelo.add(Dense(units = 20,activation='relu'))
# Capa de salida.
# units = 3 debido a que son 3 clases a predecir
modelo.add(Dense(units = 3, activation='sigmoid'))
# Configuramos el modelo
# Para los parámetros
modelo.compile(
optimizer='adam', loss='sparse_categorical_crossentropy',
metrics=['accuracy'])modelo.fit(x_train, y_train, epochs = 200, batch_size = 16, verbose = 0)## <keras.callbacks.History object at 0x7fd787adaa00>
loss, accu = modelo.evaluate(x_test, y_test, verbose = 0)
accu## 0.9736841917037964
pred = modelo.predict(x_test)##
1/2 [==============>...............] - ETA: 0s
2/2 [==============================] - 0s 2ms/step
pred[0:5]## array([[1.2365290e-05, 9.9637824e-01, 9.9999940e-01],
## [4.7529429e-01, 9.9929166e-01, 4.6571612e-01],
## [1.0000000e+00, 9.9996185e-01, 1.5392176e-08],
## [1.8377055e-05, 9.9930942e-01, 9.9999839e-01],
## [9.9999982e-01, 9.9983883e-01, 2.0162756e-07]], dtype=float32)
pred = np.argmax(pred, axis = -1)
print("Predicciones: ", pred, "\n")
# Recodificamos de manera inversa con el encoder que habÃamos creado## Predicciones: [2 1 0 2 0 2 0 1 1 1 2 1 1 1 1 0 1 1 0 0 2 1 0 0 2 0 0 1 1 0 2 1 0 2 2 1 0
## 2]
pred = encoder.inverse_transform(pred)
print(pred)## ['virginica' 'versicolor' 'setosa' 'virginica' 'setosa' 'virginica'
## 'setosa' 'versicolor' 'versicolor' 'versicolor' 'virginica' 'versicolor'
## 'versicolor' 'versicolor' 'versicolor' 'setosa' 'versicolor' 'versicolor'
## 'setosa' 'setosa' 'virginica' 'versicolor' 'setosa' 'setosa' 'virginica'
## 'setosa' 'setosa' 'versicolor' 'versicolor' 'setosa' 'virginica'
## 'versicolor' 'setosa' 'virginica' 'virginica' 'versicolor' 'setosa'
## 'virginica']
def indices_general(MC, nombres = None):
precision_global = np.sum(MC.diagonal()) / np.sum(MC)
error_global = 1 - precision_global
precision_categoria = pd.DataFrame(MC.diagonal()/np.sum(MC,axis = 1)).T
if nombres!=None:
precision_categoria.columns = nombres
return {"Matriz de Confusión":MC, "Precisión Global":precision_global,
"Error Global":error_global, "Precisión por categorÃa":precision_categoria}
# Obtenemos las etiquetas de y_test
y_test_classes = encoder.inverse_transform(y_test)
MC = confusion_matrix(y_test_classes, pred, labels = encoder.classes_)
indices = indices_general(MC, list(encoder.classes_))
for k in indices:
print("\n%s:\n%s"%(k,str(indices[k])))##
## Matriz de Confusión:
## [[13 0 0]
## [ 0 15 1]
## [ 0 0 9]]
##
## Precisión Global:
## 0.9736842105263158
##
## Error Global:
## 0.02631578947368418
##
## Precisión por categorÃa:
## setosa versicolor virginica
## 0 1.0 0.9375 1.0
Una convolución es simplemente una operación matemática que se realiza sobre los valores de los pÃxeles de una imagen.
Se utilizan diferentes núcleos(kernels) o filtros para procesar una imagen hacia una estructura conveniente para realizar cierto tarea o simplemente por estética.
import cv2
img = cv2.imread('../datos/img/mapache.png')[...,::-1]
plt.imshow(img, cmap = "gray")
plt.show()identidad = np.array(
[[0, 0, 0,
0, 1, 0,
0, 0, 0]])
desenfoque = (1/9) * np.array(
[[1, 1, 1,
1, 1, 1,
1, 1, 1]])
enfoque = np.array(
[[0, -1, 0,
-1, 5, -1,
0, -1, 0]])
emboss = np.array(
[[-2, -1, 0,
-1, 1, 1,
0, 1, 2]])
kernels = [identidad, desenfoque, enfoque, emboss]
kernel_name = ["identidad", "desenfoque", "enfoque", "emboss"]
figure, axis = plt.subplots(2, 2, figsize = (12, 10))
for kernel, name, ax in zip(kernels, kernel_name, axis.flatten()):
img_convolusion = cv2.filter2D(img, -1, kernel)
ax.imshow(abs(img_convolusion), cmap = "gray")
ax.set_title(name)
plt.show()import os
import re
import tensorflow.keras as keras
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.layers import Dense, Dropout, Flatten, Activation
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras.layers import LeakyReLUfrom tensorflow.keras.utils import plot_model
from tensorflow.keras.datasets import mnist
# carga el mnist dataset
(x_train, y_train), (x_test, y_test) = mnist.load_data()
print("x_train.shape: ", x_train.shape)## x_train.shape: (60000, 28, 28)
print("y_train.shape: ", y_train.shape)## y_train.shape: (60000,)
print("x_test.shape: ", x_test.shape)## x_test.shape: (10000, 28, 28)
print("y_test.shape: ", y_test.shape)
#Ejemplo de una imágen## y_test.shape: (10000,)
pd.set_option('expand_frame_repr', False)
print(pd.DataFrame((x_train[0])))## 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
## 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
## 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
## 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
## 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
## 4 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
## 5 0 0 0 0 0 0 0 0 0 0 0 0 3 18 18 18 126 136 175 26 166 255 247 127 0 0 0 0
## 6 0 0 0 0 0 0 0 0 30 36 94 154 170 253 253 253 253 253 225 172 253 242 195 64 0 0 0 0
## 7 0 0 0 0 0 0 0 49 238 253 253 253 253 253 253 253 253 251 93 82 82 56 39 0 0 0 0 0
## 8 0 0 0 0 0 0 0 18 219 253 253 253 253 253 198 182 247 241 0 0 0 0 0 0 0 0 0 0
## 9 0 0 0 0 0 0 0 0 80 156 107 253 253 205 11 0 43 154 0 0 0 0 0 0 0 0 0 0
## 10 0 0 0 0 0 0 0 0 0 14 1 154 253 90 0 0 0 0 0 0 0 0 0 0 0 0 0 0
## 11 0 0 0 0 0 0 0 0 0 0 0 139 253 190 2 0 0 0 0 0 0 0 0 0 0 0 0 0
## 12 0 0 0 0 0 0 0 0 0 0 0 11 190 253 70 0 0 0 0 0 0 0 0 0 0 0 0 0
## 13 0 0 0 0 0 0 0 0 0 0 0 0 35 241 225 160 108 1 0 0 0 0 0 0 0 0 0 0
## 14 0 0 0 0 0 0 0 0 0 0 0 0 0 81 240 253 253 119 25 0 0 0 0 0 0 0 0 0
## 15 0 0 0 0 0 0 0 0 0 0 0 0 0 0 45 186 253 253 150 27 0 0 0 0 0 0 0 0
## 16 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 16 93 252 253 187 0 0 0 0 0 0 0 0
## 17 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 249 253 249 64 0 0 0 0 0 0 0
## 18 0 0 0 0 0 0 0 0 0 0 0 0 0 0 46 130 183 253 253 207 2 0 0 0 0 0 0 0
## 19 0 0 0 0 0 0 0 0 0 0 0 0 39 148 229 253 253 253 250 182 0 0 0 0 0 0 0 0
## 20 0 0 0 0 0 0 0 0 0 0 24 114 221 253 253 253 253 201 78 0 0 0 0 0 0 0 0 0
## 21 0 0 0 0 0 0 0 0 23 66 213 253 253 253 253 198 81 2 0 0 0 0 0 0 0 0 0 0
## 22 0 0 0 0 0 0 18 171 219 253 253 253 253 195 80 9 0 0 0 0 0 0 0 0 0 0 0 0
## 23 0 0 0 0 55 172 226 253 253 253 253 244 133 11 0 0 0 0 0 0 0 0 0 0 0 0 0 0
## 24 0 0 0 0 136 253 253 253 212 135 132 16 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
## 25 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
## 26 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
## 27 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
plt.imshow(x_train[0], cmap = "gray")
plt.show()fig, ax = plt.subplots(8, 8, figsize=(6, 6), dpi=100)
for i, axi in enumerate(ax.flat):
no_imprimir = axi.imshow(x_train[i], cmap='binary', vmin=0, vmax=255)
no_imprimir = axi.set(xticks=[], yticks=[])
plt.show()fig, ax = plt.subplots(8, 8, figsize=(6, 6), dpi=100)
for i, axi in enumerate(ax.flat):
no_imprimir = axi.imshow(x_test[i], cmap='binary', vmin=0, vmax=255)
no_imprimir = axi.set(xticks=[], yticks=[])
plt.show()# Calculamos la cantidad de etiquetas
num_labels = len(np.unique(y_train))
# Convertimos a l forma one-hot vector
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
# Dimensiones de entrada de la imagen
image_size = x_train.shape[1]
# Cambiar el tamaño
x_train = np.reshape(x_train,[-1, image_size, image_size, 1])
x_test = np.reshape(x_test,[-1, image_size, image_size, 1])
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255#-----Parámetros por utilizar----
# La imagen es procesada en escala de grices
input_shape = (image_size, image_size, 1)
kernel_size = 3
pool_size = 2
filters = 64
dropout = 0.2
model = Sequential()
model.add(Conv2D(filters=filters,
kernel_size=kernel_size,
activation='relu',
input_shape=input_shape))
model.add(MaxPooling2D(pool_size))
model.add(Conv2D(filters=filters,
kernel_size=kernel_size,
activation='relu'))
model.add(MaxPooling2D(pool_size))
model.add(Conv2D(filters=filters,
kernel_size=kernel_size,
activation='relu'))
#Aplanamos los datos
model.add(Flatten())
# dropout added as regularizer
model.add(Dropout(dropout))
#Hidden layers
model.add(Dense(40, activation = "relu"))
model.add(Dense(20, activation = "sigmoid"))
# La capa de salida deve ser 10-dim one-hot vector
model.add(Dense(num_labels))
model.add(Activation('softmax'))
# Resumen de la estructura
model.summary()
#Función para graficar un grafo con la estructura de la red neuronal
# Requiere graphviz y pydot
#plot_model(model, to_file='cnn-mnist.png', show_shapes=True)
#img = mpimg.imread('cnn-mnist.png')
#plt.imshow(img)
#plt.show()## Model: "sequential_1"
## _________________________________________________________________
## Layer (type) Output Shape Param #
## =================================================================
## conv2d (Conv2D) (None, 26, 26, 64) 640
##
## max_pooling2d (MaxPooling2D (None, 13, 13, 64) 0
## )
##
## conv2d_1 (Conv2D) (None, 11, 11, 64) 36928
##
## max_pooling2d_1 (MaxPooling (None, 5, 5, 64) 0
## 2D)
##
## conv2d_2 (Conv2D) (None, 3, 3, 64) 36928
##
## flatten (Flatten) (None, 576) 0
##
## dropout (Dropout) (None, 576) 0
##
## dense_3 (Dense) (None, 40) 23080
##
## dense_4 (Dense) (None, 20) 820
##
## dense_5 (Dense) (None, 10) 210
##
## activation (Activation) (None, 10) 0
##
## =================================================================
## Total params: 98,606
## Trainable params: 98,606
## Non-trainable params: 0
## _________________________________________________________________
# Función de costo para one-hot vector
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])# Entrenamos el modelo
batch_size = 128
model.fit(x_train, y_train, epochs=10, batch_size=batch_size, verbose = 0)## <keras.callbacks.History object at 0x7fd76d818d00>
salida, acc = model.evaluate(x_test,
y_test,
batch_size=batch_size,
verbose=0)
print("\nPrecisión Global: %.1f%%" % (100.0 * acc))##
## Precisión Global: 99.3%
# carga el mnist dataset
train, (x_test, y_test) = mnist.load_data()
#Estos datos son los que puede procesar el modelo
x_test_model = np.reshape(x_test,[-1, 28, 28, 1])
x_test_model = x_test_model.astype('float32') / 255
print(x_test_model.shape)
#Graficamos los números que vamos a intentar clasificar## (10000, 28, 28, 1)
fig, ax = plt.subplots(4, 4, figsize=(6, 6), dpi=100)
for i, axi in enumerate(ax.flat):
no_imprimir = axi.imshow(x_test[i], cmap = 'gray', vmin=0, vmax=255)
no_imprimir = axi.set(xticks=[], yticks=[])
plt.show()#Predecimos 16 imágenes
pred = model.predict(x_test_model[0:16])
#Para obtener las "etiquetas"##
1/1 [==============================] - ETA: 0s
1/1 [==============================] - 0s 83ms/step
pred = np.argmax(pred, axis = -1)
#Mostramos las predicciones
print("Predicciones en fila: ",pred,"\n")## Predicciones en fila: [7 2 1 0 4 1 4 9 5 9 0 6 9 0 1 5]
print("Predicciones 4x4:\n", np.reshape(pred,[4,4]))## Predicciones 4x4:
## [[7 2 1 0]
## [4 1 4 9]
## [5 9 0 6]
## [9 0 1 5]]
import os
import re
import cv2
import tensorflow.keras as keras
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.layers import Dense, Dropout, Flatten, Activation
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras.layers import LeakyReLUimg = cv2.imread('datos/climas/Shine/shine95.jpg')[...,::-1]
plt.imshow(img)
plt.show()img = cv2.imread('datos/climas/Rain/rain120.jpg')[...,::-1]
plt.imshow(img)
plt.show()img = cv2.imread('datos/climas/Cloudy/cloudy249.jpg')[...,::-1]
plt.imshow(img)
plt.show()labels = []
imagenes = []
for carpeta in next(os.walk('datos/climas/'))[1]:
for nombrearchivo in next(os.walk('datos/climas/' + '/' + carpeta))[2]:
if re.search("\\.(jpg|jpeg|png|bmp|tiff)$", nombrearchivo):
try:
img = cv2.imread('datos/climas/' + '/' + carpeta + '/' + nombrearchivo)[...,::-1]
img = cv2.resize(img, (64, 64))
imagenes.append(img)
labels.append(carpeta)
except:
print("No se pudo cargar la imagen: " + nombrearchivo + " en la carpeta: " + carpeta)## No se pudo cargar la imagen: rain141.jpg en la carpeta: Rain
## No se pudo cargar la imagen: shine131.jpg en la carpeta: Shine
X = np.array(imagenes, dtype = np.float32)
y = np.array(labels)print(
'Total de individuos: ', len(X),
'\nNúmero total de salidas: ', len(np.unique(y)),
'\nClases de salida: ', np.unique(y))## Total de individuos: 1123
## Número total de salidas: 4
## Clases de salida: ['Cloudy' 'Rain' 'Shine' 'Sunrise']
# Dividir en train y test
train_X, test_X, train_Y, test_Y = train_test_split(X, y, test_size = 0.05)
# Normalizar
train_X = train_X / 255.
test_X = test_X / 255.
# Cambiamos las etiquetas de categoricas a one-hot encoding
train_Y_one_hot = pd.get_dummies(train_Y)
train_Y_one_hot = train_Y_one_hot.to_numpy()
test_Y_one_hot = pd.get_dummies(test_Y)
test_Y_one_hot = test_Y_one_hot.to_numpy()train_Y_one_hot## array([[0, 0, 1, 0],
## [0, 0, 1, 0],
## [1, 0, 0, 0],
## ...,
## [0, 0, 1, 0],
## [1, 0, 0, 0],
## [0, 0, 1, 0]], dtype=uint8)
epochs = 50
batch_size = 64
modelo_clima = Sequential()
modelo_clima.add(Conv2D(64, kernel_size=(3, 3),
activation='linear',padding='same',input_shape=(64,64,3)))
modelo_clima.add(LeakyReLU(alpha=0.1))
modelo_clima.add(MaxPooling2D((2, 2),padding='same'))
modelo_clima.add(Dropout(0.5))
modelo_clima.add(Flatten())
modelo_clima.add(Dense(32, activation='linear'))
modelo_clima.add(LeakyReLU(alpha=0.1))
modelo_clima.add(Dropout(0.5))
modelo_clima.add(Dense(len(np.unique(y)), activation='softmax'))
modelo_clima.summary()## Model: "sequential_2"
## _________________________________________________________________
## Layer (type) Output Shape Param #
## =================================================================
## conv2d_3 (Conv2D) (None, 64, 64, 64) 1792
##
## leaky_re_lu (LeakyReLU) (None, 64, 64, 64) 0
##
## max_pooling2d_2 (MaxPooling (None, 32, 32, 64) 0
## 2D)
##
## dropout_1 (Dropout) (None, 32, 32, 64) 0
##
## flatten_1 (Flatten) (None, 65536) 0
##
## dense_6 (Dense) (None, 32) 2097184
##
## leaky_re_lu_1 (LeakyReLU) (None, 32) 0
##
## dropout_2 (Dropout) (None, 32) 0
##
## dense_7 (Dense) (None, 4) 132
##
## =================================================================
## Total params: 2,099,108
## Trainable params: 2,099,108
## Non-trainable params: 0
## _________________________________________________________________
INIT_LR = 1e-3
modelo_clima.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adagrad(lr=INIT_LR),
metrics=['accuracy'])## /Users/promidat05/Library/r-miniconda/envs/r-reticulate/lib/python3.8/site-packages/keras/optimizers/optimizer_v2/adagrad.py:81: UserWarning: The `lr` argument is deprecated, use `learning_rate` instead.
## super().__init__(name, **kwargs)
#Entrenamos el modelo
modelo_clima.fit(train_X, train_Y_one_hot, batch_size = batch_size,
epochs = epochs, verbose = 0)
# Guardamos la red, para reutilizarla en el futuro, sin tener que volver a entrenar## <keras.callbacks.History object at 0x7fd76fb34340>
modelo_clima.save("cnn_clima.h5py")## WARNING:absl:Found untraced functions such as _jit_compiled_convolution_op while saving (showing 1 of 1). These functions will not be directly callable after loading.
from tensorflow.keras.models import load_model
modelo_clima = load_model('cnn_clima.h5py')
#Evaluamos con respecto a la tabla de testing
test_eval = modelo_clima.evaluate(test_X, test_Y_one_hot, verbose=0)
print('\nTest loss:', test_eval[0])##
## Test loss: 0.5229036211967468
print('Test accuracy:', test_eval[1])## Test accuracy: 0.7368420958518982
from urllib.request import Request, urlopen
req = Request('https://images.pexels.com/photos/1870259/pexels-photo-1870259.jpeg?auto=compress&cs=tinysrgb&dpr=1&w=500', headers={'User-Agent': 'Mozilla/5.0'})
response = urlopen(req)
arr = np.asarray(bytearray(response.read()), dtype=np.uint8)
img_1 = cv2.imdecode(arr, -1)[...,::-1]
req = Request('https://parquesalegres.org/wp-content/uploads/2018/06/lluvia-4-1023x767.jpg', headers={'User-Agent': 'Mozilla/5.0'})
response = urlopen(req)
arr = np.asarray(bytearray(response.read()), dtype=np.uint8)
img_2 = cv2.imdecode(arr, -1)[...,::-1]
req = Request('https://www.surfertoday.com/images/jamp/page/sunrisesunsettime.jpg', headers={'User-Agent': 'Mozilla/5.0'})
response = urlopen(req)
arr = np.asarray(bytearray(response.read()), dtype=np.uint8)
img_3 = cv2.imdecode(arr, -1)[...,::-1]
req = Request('https://us.123rf.com/450wm/candy18/candy181801/candy18180100138/94577205-panorama-de-asfalto-de-carreteras-en-el-campo-en-un-d%C3%ADa-soleado-de-primavera-ruta-en-el-bello-paisaj.jpg?ver=6', headers={'User-Agent': 'Mozilla/5.0'})
response = urlopen(req)
arr = np.asarray(bytearray(response.read()), dtype=np.uint8)
img_4 = cv2.imdecode(arr, -1)[...,::-1]
imgs = np.array([img_1, img_2, img_3, img_4])## <string>:1: VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray.
fig, ax = plt.subplots(1, 4, figsize=(6, 6), dpi=100)
for i, axi in enumerate(ax.flat):
axi.imshow(imgs[i])
axi.set(xticks=[], yticks=[])## <matplotlib.image.AxesImage object at 0x7fd76d580e80>
## [[], []]
## <matplotlib.image.AxesImage object at 0x7fd76f07b310>
## [[], []]
## <matplotlib.image.AxesImage object at 0x7fd76e284a30>
## [[], []]
## <matplotlib.image.AxesImage object at 0x7fd76ed592b0>
## [[], []]
plt.show()# Preparamos lo datos como los requiere el modelo
imgs = np.array([cv2.resize(img_1, (64, 64)), cv2.resize(img_2, (64, 64)), cv2.resize(img_3, (64, 64)), cv2.resize(img_4, (64, 64))])
imgs = imgs.astype('float32')
imgs = imgs / 255.pred = modelo_clima.predict(imgs)##
1/1 [==============================] - ETA: 0s
1/1 [==============================] - 0s 65ms/step
print("\nPredicciones: ", np.argmax(pred, axis = 1))##
## Predicciones: [0 1 3 2]
print("\nPredicciones: ", np.unique(y)[np.argmax(pred, axis = 1)])##
## Predicciones: ['Cloudy' 'Rain' 'Sunrise' 'Shine']